home
***
CD-ROM
|
disk
|
FTP
|
other
***
search
/
Language/OS - Multiplatform Resource Library
/
LANGUAGE OS.iso
/
pcr
/
pcr4_4.lha
/
DIST
/
gc
/
GCreclaim.c,v
< prev
next >
Wrap
Text File
|
1991-06-17
|
28KB
|
971 lines
head 1.1;
access;
symbols;
locks; strict;
comment @ * @;
1.1
date 91.06.17.23.20.31; author boehm; state Exp;
branches;
next ;
desc
@@
1.1
log
@Initial revision
@
text
@/* begincopyright
Copyright (c) 1988,1990 Xerox Corporation. All rights reserved.
Use and copying of this software and preparation of derivative works based
upon this software are permitted. Any distribution of this software or
derivative works must comply with all applicable United States export
control laws. This software is made available AS IS, and Xerox Corporation
makes no warranty about the software, its performance or its conformity to
any specification. Any person obtaining a copy of this software is requested
to send their name and post office or electronic mail address to:
PCR Coordinator
Xerox PARC
3333 Coyote Hill Rd.
Palo Alto, CA 94304
Parts of this software were derived from code bearing the copyright notice:
Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
This material may be freely distributed, provided this notice is retained.
This material is provided as is, with no warranty expressed or implied.
Use at your own risk.
endcopyright */
#include "xr/GCPrivate.h"
#include "xr/Threads.h"
#define I_HOLD_ML(ml) (((XR_ML)(ml))->ml_holder == XR_currThread)
#define DEBUG
#undef DEBUG
/* Boehm, October 8, 1990 1:57:26 pm PDT */
/*
* reclaim phase
*
*/
static long GC_last_gc_flushed = 0;
static struct hblk * GC_freeable_blocks = 0;
/* List of large object blocks waiting to be freed. We enqueue these */
/* and then process them in large chunks. Freeing a heap block can */
/* be sufficiently expensive that we don't want to do this with the */
/* world stopped. Processing them one at a time slows things down */
/* because it causes the heap block allocator to lose some state. */
/* Free some or all blocks on list of freeable blocks. Caller should hold */
/* GC_allocate_ml. */
static GC_free_blocks(how_many)
long how_many;
# define ALL 1000000000
# define A_FEW 10
{
register int i;
register struct hblk * h;
if (!I_HOLD_ML(&GC_allocate_ml)) {
XR_Panic("GC_free_blocks 0");
}
for (i = 0; i < how_many; i++) {
if ((h = GC_freeable_blocks) == 0) return;
GC_freeable_blocks = hb_sz_link(h);
/* Mark it as being uninitialized */
hb_uninit(h) = 1;
/* remove this block from list of active blocks */
GC_del_hblklist(h);
GC_freehblk(h);
}
}
/* Assumes caller holds GC_allocate_ml */
void GC_zero_reclaim_lists()
{
register struct hblk ** hbpp;
register struct hblk ** rlp;
register struct hblk * hbp;
for( rlp = reclaim_list; rlp < &reclaim_list[MAXOBJSZ+1]; rlp++ ) {
*rlp = (struct hblk *)0;
}
for( rlp = areclaim_list; rlp < &areclaim_list[MAXOBJSZ+1]; rlp++ ) {
*rlp = (struct hblk *)0;
}
GC_reclaim_inval_counter++;
}
void GC_reclaim_empty_pages()
/* Reclaim any potentially empty blocks that have not yet been */
/* reclaimed. */
/* Caller should hold GC_allocate_ml. */
{
register struct hblk ** hbpp;
register struct hblk ** rlp;
register struct hblk * hbp;
# ifdef PRINTSTATS
register long n_reclaimed = 0;
register long n_tenured = 0;
# endif
if (!I_HOLD_ML(&GC_allocate_ml)) {
XR_Panic("GC_reclaim_empty_pages 0");
}
GC_free_blocks(ALL);
if (GC_gc_no == GC_last_gc_flushed) {
return;
}
/* First blocks with composite objects: */
for( rlp = reclaim_list; rlp < &reclaim_list[MAXOBJSZ+1]; rlp++ ) {
hbpp = rlp;
while ((hbp = *hbpp) != (struct hblk *)0) {
if (GC_hblk_empty(hbp)) {
# ifdef PRINTSTATS
n_reclaimed++;
# endif
* hbpp = hb_sz_link(hbp);
GC_reclaim_entire_block(hbp);
} else {
hbpp = & (hb_sz_link(hbp));
# ifdef PRINTSTATS
n_tenured++;
# endif
}
}
}
/* Now blocks with atomic objects: */
for( rlp = areclaim_list; rlp < &areclaim_list[MAXOBJSZ+1]; rlp++ ) {
hbpp = rlp;
while ((hbp = *hbpp) != (struct hblk *)0) {
if (GC_hblk_empty(hbp)) {
# ifdef PRINTSTATS
n_reclaimed++;
# endif
* hbpp = hb_sz_link(hbp);
GC_reclaim_entire_block(hbp);
} else {
hbpp = & (hb_sz_link(hbp));
# ifdef PRINTSTATS
n_tenured++;
# endif
}
}
}
GC_last_gc_flushed = GC_gc_no;
# ifdef PRINTSTATS
GC_vprintf(
"Reclaimed %d apparently empty blocks, skipped %d nonempty blocks\n",
n_reclaimed, n_tenured);
# endif
}
/*
* Reclaim all pages that were reclaimed in the last collection cycle.
* Flush the rest.
*
* GC_allocate_ml should not be held by the caller. However,
* we will acquire and release it repeatedly.
* We claim that we don't need the allocation lock for most of this,
* since GC_reclaim_composite and GC_reclaim_atomic are robust
* against being called on an empty list, and we don't write to any
* important data structures. Thus we can't screw up in any
* way that would affect correctness (other than of printed
* statistics). We do guarantee that all reclaim lists are
* empty when we're done, since no other thread will be adding
* to the lists.
*/
void GC_reclaim_useful_pages()
{
register struct hblk ** hbpp;
register struct hblk * hbp;
register struct hblk ** rlp;
# ifdef PRINTSTATS
register long n_reclaimed = 0;
register long n_tenured = 0;
# endif
if (I_HOLD_ML(&GC_allocate_ml)) {
XR_Panic("GC_reclaim_useful_pages 0");
}
/* First blocks with composite objects: */
for( rlp = reclaim_list; rlp < &reclaim_list[MAXOBJSZ+1]; rlp++ ) {
while ((hbp = *rlp) != (struct hblk *)0) {
if (hb_last_reclaimed(hbp) == GC_gc_no-1
|| GC_hblk_empty(hbp)) {
# ifdef PRINTSTATS
n_reclaimed++;
# endif
GC_reclaim_composite(rlp);
} else {
# ifdef PRINTSTATS
n_tenured++;
# endif
XR_MonitorEntry(&GC_allocate_ml);
*rlp = hb_sz_link(hbp);
XR_MonitorExit(&GC_allocate_ml);
}
}
}
/* Now blocks with atomic objects: */
for( rlp = areclaim_list; rlp < &areclaim_list[MAXOBJSZ+1]; rlp++ ) {
hbpp = rlp;
while ((hbp = *rlp) != (struct hblk *)0) {
if (hb_last_reclaimed(hbp) == GC_gc_no-1
|| GC_hblk_empty(hbp)) {
# ifdef PRINTSTATS
n_reclaimed++;
# endif
GC_reclaim_atomic(rlp);
} else {
# ifdef PRINTSTATS
n_tenured++;
# endif
XR_MonitorEntry(&GC_allocate_ml);
*rlp = hb_sz_link(hbp);
XR_MonitorExit(&GC_allocate_ml);
}
}
}
XR_MonitorEntry(&GC_allocate_ml);
GC_last_gc_flushed = GC_gc_no;
GC_reclaim_inval_counter++;
XR_MonitorExit(&GC_allocate_ml);
GC_vprintf( "Reclaimed %d new blocks, flushed %d old blocks\n",
n_reclaimed, n_tenured);
}
/*
* The main routine for the sweep phase
*/
void GC_clear_free_lists()
{
/* Clear all free lists */
register struct obj **fop;
for( fop = objfreelist; fop < &objfreelist[MAXOBJSZ+1]; fop++ ) {
*fop = (struct obj *)0;
}
for( fop = aobjfreelist; fop < &aobjfreelist[MAXOBJSZ+1]; fop++ ) {
*fop = (struct obj *)0;
}
}
void GC_reclaim()
{
register struct hblk *hbp; /* ptr to current heap block */
register unsigned long index;
# ifdef SEPARATE_HEADERS
struct hblkhdr * hhdr;
# endif
register int sz; /* size of objects in current block */
register bool is_atomic; /* => current block contains ptrfree objs */
register char * GC_heapstart_reg = GC_heapstart;
# define GC_heapstart GC_heapstart_reg
# ifdef PRINTSTATS
long now_count = 0;
long defer_count = 0;
long skip_count = 0;
# endif
# ifdef PRINTBLOCKS
GC_vprintf("reclaim: current block sizes:\n");
# endif
# ifdef DEBUG
GC_vprintf("GC_heapstart = 0x%X, GC_heaplim = 0x%X\n", GC_heapstart,
GC_heaplim);
{ int i;
for (i = 0;
i < ((long)GC_heaplim - (long)GC_heapstart) / HBLKSIZE;
i++) {
GC_vprintf("(%d)", hblkmap[i]);
}
GC_vprintf("\n");
}
# endif
/* Go through all heap blocks (in hblklist) and reclaim unmarked objects */
/* For blocks containing small objects, we queue the block for later */
/* reclamation. */
hbp = (struct hblk *) (((long) GC_heaplim - 1) & ~(HBLKMASK-1));
index = hbp - (struct hblk *) GC_heapstart;
while (index > 0) {
switch (hblkmap[index]) {
case HBLK_INVALID:
index--;
break;
case HBLK_VALID:
hbp = ((struct hblk *) GC_heapstart) + index;
# ifdef SEPARATE_HEADERS
hhdr = headers[index];
sz = hb_sz_from_hdr(hhdr);
# define SZ_LINK hb_sz_link_from_hdr(hhdr)
# else
sz = hb_sz(hbp);
# define SZ_LINK hb_sz_link(hbp)
# endif
if (sz < 0) {
sz = -sz;
is_atomic = TRUE; /* this block contains atomic objs */
} else {
is_atomic = FALSE;
}
if( sz > MAXOBJSZ ) { /* 1 big object */
# ifdef SEPARATE_HEADERS
register bool mb = mark_bit_from_hdr(hhdr, HDR_WORDS);
# else
register bool mb = mark_bit(hbp, HDR_WORDS);
# endif
# ifdef PRINTSTATS
now_count++;
# endif
# ifdef PRINTBLOCKS
GC_vprintf("%d(%c,%c)",sz, (is_atomic? 'a' : 'c'),
mb ? 'n' : 'e' );
# endif
if (!mb) {
GC_mem_found += sz;
SZ_LINK = GC_freeable_blocks;
GC_freeable_blocks = hbp;
} /* end if (empty) */
} else { /* group of smaller objects */
if (!(hb_tenured(hbp))) {
# ifdef PRINTSTATS
defer_count++;
# endif
if (is_atomic) {
SZ_LINK = areclaim_list[sz];
areclaim_list[sz] = hbp;
} else {
SZ_LINK = reclaim_list[sz];
reclaim_list[sz] = hbp;
}
} else {
# ifdef PRINTSTATS
skip_count++;
# endif
}
}
index--;
break;
default:
index -= hblkmap[index];
} /* end switch */
} /* end for */
# ifdef PRINTSTATS
GC_printf(
"Directly reclaimed %d blocks, deferred %d blocks, skipped %d blocks\n",
now_count, defer_count, skip_count);
# endif
# undef GC_heapstart
}
/* revoke all tenure */
void GC_revoke_all_tenure()
{
register struct hblk *hbp; /* ptr to current heap block */
register long index;
/* Go through all heap blocks (in hblklist) and untenure everyone */
hbp = (struct hblk *) (((long) GC_heaplim - 1) & ~(HBLKMASK-1));
index = hbp - (struct hblk *) GC_heapstart;
while (index >= 0) {
switch (hblkmap[index]) {
case HBLK_INVALID:
index--;
break;
case HBLK_VALID:
hbp = ((struct hblk *) GC_heapstart) + index;
hb_tenured(hbp) = 0;
index--;
break;
default:
index -= hblkmap[index];
} /* end switch */
} /* end while */
}
/* Reclaim all objects in heap block pointed to by given */
/* reclaim list entry, and remove the block from the list. */
/* The block is assumed to contain atomic objects. */
/* Assumes GC_allocate_ml is not held. Acquires it. */
/* Results are discarded if there is any possible */
/* interference with another thread. */
void GC_reclaim_atomic(rlp)
struct hblk **rlp; /* ptr to reclaim list entry */
{
register struct hblk * hbp;
register int word_no; /* Number of word in block */
register long i;
register word *p; /* pointer to current word in block */
register int mb; /* mark bit of current word */
int sz; /* size of objects in current block */
word *plim;
int empty; /* empty & done with block => block empty */
struct obj *list; /* list of free objects in block */
struct obj *last; /* last free object on list */
register int words_found = 0;
unsigned long init_inval_counter;
bool busy;
# ifdef SEPARATE_HEADERS
register struct hblkhdr * hhdr;
# endif
if (I_HOLD_ML(&GC_allocate_ml)) {
XR_Panic("GC_reclaim_atomic 0");
}
# ifdef GATHERSTATS
GC_reclaim_count++;
# endif
XR_MonitorEntry(&GC_allocate_ml);
/* Free blocks waiting to be restored to free list while we're in control */
GC_free_blocks(A_FEW);
hbp = *rlp;
if (hbp == NIL) {
XR_MonitorExit(&GC_allocate_ml);
return;
}
# ifdef VISUALIZE
/* Make sure it's not dimmed */
displayAllocHblk(hbp, BYTES_TO_WORDS(HBLKSIZE));
# endif
* rlp = hb_sz_link(hbp);
init_inval_counter = GC_reclaim_inval_counter;
# ifdef SEPARATE_HEADERS
hhdr = HDR(hbp);
sz = -(hb_sz_from_hdr(hhdr));
busy = hb_busy_from_hdr(hhdr);
# else
sz = -(hb_sz(hbp));
busy = hb_busy(hbp);
# endif
if (busy) {
XR_ConsoleMsg("Found busy block\n");
XR_MonitorExit(&GC_allocate_ml);
return;
}
# ifdef SEPARATE_HEADERS
hb_busy_from_hdr(hhdr) = TRUE;
# else
hb_busy(hbp) = TRUE;
# endif
/* If this block looks like it will be tenured, avoid touching it. */
if (GC_hblk_probably_tenurable(hbp)
&& GC_hblk_definitely_tenurable(hbp)) {
# ifdef GATHERSTATS
GC_tenure_count++;
# endif
hb_tenured(hbp) = 1;
XR_MonitorExit(&GC_allocate_ml);
return;
}
XR_MonitorExit(&GC_allocate_ml);
p = (word *)(hbp->hb_body);
word_no = ((word *)p) - ((word *)hbp);
plim = (word *)((((unsigned)hbp) + HBLKSIZE)
- WORDS_TO_BYTES(sz));
last = (struct obj *)0;
/* Look for first reclaimable object */
while( p <= plim && last == (struct obj *)0) {
# ifndef SEPARATE_HEADERS
mb = mark_bit(hbp, word_no);
# else
mb = mark_bit_from_hdr(hhdr, word_no);
# endif
if( mb ) {
# ifdef VISUALIZE
displayMark(p, sz);
# endif
p += sz;
} else {
words_found += sz;
/* word is available - put on list */
# ifdef DIRTY_BITS
/* We will write to this page. This is likely to */
/* generate a write fault. Explicitly dirty the page */
/* instead. */
XR_PrepareForWriting(p);
# endif
((struct obj *)p)->obj_link = (struct obj *)0;
last = list = ((struct obj *)p);
# ifdef VISUALIZE
displayFree(p, sz);
# endif
p += sz;
}
word_no += sz;
}
/* Look for remaining reclaimable objects */
while( p <= plim ) {
# ifndef SEPARATE_HEADERS
mb = mark_bit(hbp, word_no);
# else
mb = mark_bit_from_hdr(hhdr, word_no);
# endif
if( mb ) {
# ifdef VISUALIZE
displayMark(p, sz);
# endif
p += sz;
} else {
words_found += sz;
/* object is available - put on list */
((struct obj *)p)->obj_link = list;
list = ((struct obj *)p);
# ifdef VISUALIZE
displayFree(p, sz);
# endif
p += sz;
}
word_no += sz;
}
empty = (p - (word *)(hbp->hb_body) == words_found);
/*
* if block has reachable words in it, we can't reclaim the
* whole thing so put list of free words in block back on
* free list for this size.
*/
# ifdef PRINTBLOCKS
GC_vprintf("%d(a,%c)",sz, (empty ? 'e' : 'n') );
# endif
XR_MonitorEntry(&GC_allocate_ml);
hb_busy(hbp) = FALSE;
if (init_inval_counter != GC_reclaim_inval_counter) {
XR_ConsoleMsg("Found incremented reclaim counter\n");
XR_MonitorExit(&GC_allocate_ml);
return;
}
if (empty) {
hb_uninit(hbp) = 1;
/* remove this block from list of active blocks and free it */
{
GC_del_hblklist(hbp);
GC_freehblk(hbp);
GC_mem_found += words_found;
}
} else { /* ! empty */
/* We checked for tenurable above, but only checked for definitely
tenurable if the block was probably tenurable. Thus, it may happen
that a block is not probably tenurable, but against all probability
is indeed tenurable. That's what this test here is for.
*/
if (!TENURE(words_found)) {
last -> obj_link = aobjfreelist[sz];
aobjfreelist[sz] = list;
GC_mem_found += words_found;
} else {
/* Throw away what we found, since we don't */
/* want to keep dirtying the page. */
# ifdef GATHERSTATS
GC_tenure_count++;
# endif
# ifdef VISUALIZE
dim_page(hbp);
# endif
hb_tenured(hbp) = 1;
}
hb_last_reclaimed(hbp) = GC_gc_no;
}
XR_MonitorExit(&GC_allocate_ml);
}
/* Reclaim all objects in heap block pointed to by given */
/* reclaim list entry, and remove the block from the list. */
/* The block is assumed to contain composite objects. */
/* Assumes GC_allocate_ml is not held. Acquires it. */
/* Results are discarded if there is any possible */
/* interference with another thread. */
void GC_reclaim_composite(rlp)
struct hblk **rlp; /* ptr to current heap block */
{
register struct hblk * hbp;
register int word_no; /* Number of word in block */
register long i;
register word *p; /* pointer to current word in block */
register int mb; /* mark bit of current word */
int sz; /* size of objects in current block */
word *plim;
int empty; /* empty & done with block => block empty */
struct obj *list; /* list of free objects in block */
struct obj *last; /* last free object on list */
register int words_found = 0;
unsigned long init_inval_counter;
bool busy;
# ifdef SEPARATE_HEADERS
register struct hblkhdr * hhdr;
# endif
# ifdef GATHERSTATS
GC_reclaim_count++;
# endif
XR_MonitorEntry(&GC_allocate_ml);
/* Free blocks waiting to be restored to free list while we're in control */
GC_free_blocks(A_FEW);
hbp = *rlp;
if (hbp == NIL) {
XR_MonitorExit(&GC_allocate_ml);
return;
}
# ifdef VISUALIZE
/* Make sure it's not dimmed */
displayAllocHblk(hbp, BYTES_TO_WORDS(HBLKSIZE));
# endif
* rlp = hb_sz_link(hbp);
init_inval_counter = GC_reclaim_inval_counter;
# ifdef SEPARATE_HEADERS
hhdr = HDR(hbp);
sz = hb_sz_from_hdr(hhdr);
busy = hb_busy_from_hdr(hhdr);
# else
sz = hb_sz(hbp);
busy = hb_busy(hbp);
# endif
if (busy) {
XR_ConsoleMsg("Found busy block\n");
XR_MonitorExit(&GC_allocate_ml);
return;
}
# ifdef SEPARATE_HEADERS
hb_busy_from_hdr(hhdr) = TRUE;
# else
hb_busy(hbp) = TRUE;
# endif
/* If this block looks like it will be tenured, avoid touching it. */
if (GC_hblk_probably_tenurable(hbp)
&& GC_hblk_definitely_tenurable(hbp)) {
# ifdef GATHERSTATS
GC_tenure_count++;
# endif
hb_tenured(hbp) = 1;
XR_MonitorExit(&GC_allocate_ml);
return;
}
XR_MonitorExit(&GC_allocate_ml);
p = (word *)(hbp->hb_body);
word_no = ((word *)p) - ((word *)hbp);
plim = (word *)((((unsigned)hbp) + HBLKSIZE)
- WORDS_TO_BYTES(sz));
last = (struct obj *)0;
/* Look for first reclaimable object */
while( p <= plim && last == (struct obj *)0) {
# ifndef SEPARATE_HEADERS
mb = mark_bit(hbp, word_no);
# else
mb = mark_bit_from_hdr(hhdr, word_no);
# endif
if( mb ) {
# ifdef VISUALIZE
displayMark(p, sz);
# endif
p += sz;
} else {
words_found += sz;
# ifdef VISUALIZE
displayFree(p, sz);
# endif
/* word is available - put on list */
# ifdef DIRTY_BITS
/* We will write to this page. This is likely to */
/* generate a write fault. Explicitly dirty the page */
/* instead. */
XR_PrepareForWriting(p);
# endif
((struct obj *)p)->obj_link = (struct obj *)0;
last = list = ((struct obj *)p);
/* Clear object, advance p to next object in the process */
i = (long)(p + sz);
p++; /* Skip link field */
while (p < (word *)i) {
*p++ = 0;
}
}
word_no += sz;
}
/* Look for remaining reclaimable objects */
while( p <= plim ) {
# ifndef SEPARATE_HEADERS
mb = mark_bit(hbp, word_no);
# else
mb = mark_bit_from_hdr(hhdr, word_no);
# endif
if( mb ) {
p += sz;
# ifdef VISUALIZE
displayMark(p, sz);
# endif
} else {
words_found += sz;
# ifdef VISUALIZE
displayFree(p, sz);
# endif
/* object is available - put on list */
((struct obj *)p)->obj_link = list;
list = ((struct obj *)p);
/* Clear object, advance p to next object in the process */
i = (long)(p + sz);
p++; /* Skip link field */
while (p < (word *)i) {
*p++ = 0;
}
}
word_no += sz;
}
empty = (p - (word *)(hbp->hb_body) == words_found);
/*
* if block has reachable words in it, we can't reclaim the
* whole thing so put list of free words in block back on
* free list for this size.
*/
# ifdef PRINTBLOCKS
GC_vprintf("%d(c,%c)",sz, (empty ? 'e' : 'n') );
# endif
XR_MonitorEntry(&GC_allocate_ml);
hb_busy(hbp) = FALSE;
if (init_inval_counter != GC_reclaim_inval_counter) {
XR_ConsoleMsg("Found incremented reclaim counter\n");
XR_MonitorExit(&GC_allocate_ml);
return;
}
if (empty) {
/* Clear words at beginning of objects */
/* Since most of it is already cleared */
p = (word *)(hbp->hb_body);
plim = (word *)((((unsigned)hbp) + HBLKSIZE)
- WORDS_TO_BYTES(sz));
while (p <= plim) {
*p = 0;
p += sz;
}
hb_uninit(hbp) = 0;
/* remove this block from list of active blocks and free it */
{
GC_del_hblklist(hbp);
GC_freehblk(hbp);
GC_mem_found += words_found;
}
} else { /* ! empty */
/* Add to the appropriate free list, unless it's too full */
/* We claim that we don't lose much if this gets interrupted */
/* in the middle. */
/* We checked for turable above, but only checked for definitely
tenurable if the block was probably tenurable. Thus, it may happen
that a block is not probably tenurable, but against all probability
is indeed tenurable. That's what this test here is for.
*/
if (!TENURE(words_found)) {
last -> obj_link = objfreelist[sz];
objfreelist[sz] = list;
GC_mem_found += words_found;
/* ...and here is where we need to fluff up the mark bits for
two-collection survival policy */
} else {
/* Throw away what we found, since we don't */
/* want to keep dirtying the page. */
# ifdef GATHERSTATS
GC_tenure_count++;
# endif
# ifdef VISUALIZE
dim_page(hbp);
# endif
hb_tenured(hbp) = 1;
}
hb_last_reclaimed(hbp) = GC_gc_no;
}
XR_MonitorExit(&GC_allocate_ml);
}
/* Return an entire small object block to the free memory pool. */
/* it had better be empty. Otherwise we make no assumptions. */
/* We try not to touch the block itself. */
void GC_reclaim_entire_block(hbp)
register struct hblk *hbp;
{
short sz = hb_sz(hbp);
if (hb_busy(hbp)) {
XR_ConsoleMsg("GC_reclaim_entire_block encountered busy\n");
/* Some one is still busy reclaiming this from a previous */
/* collection cycle, and may be writing a free list in it. */
/* He'll eventually look at GC_reclaim_inval_counter and */
/* discard it. Then we'll catch it the next time. */
return;
}
if (sz < 0) sz = -sz;
hb_uninit(hbp) = 1;
GC_del_hblklist(hbp);
GC_freehblk(hbp);
GC_mem_found += (BODY_SZ - (BODY_SZ % sz));
}
/* Check whether a heap block is definitely empty. */
/* Hbp points to a block containing small objects. */
/* May (with tiny probability) identify an empty */
/* block as nonempty. */
bool GC_hblk_empty(hbp)
register struct hblk *hbp; /* ptr to current heap block */
{
register long * m;
register long * marks = hb_marks(hbp);
register long * mark_lim = marks + MARK_BITS_SZ;
/* Make a quick attempt at ignoring bogus mark bits at the beginning. */
if (HDR_WORDS >= 16) {
if (HDR_WORDS >= 32) {
marks++;
} else {
*marks &= 0xffff0000;
}
}
for (m = marks; m < mark_lim; m++) {
if (*m) return(FALSE);
}
return(TRUE);
}
/* Check whether a heap block is likely to be tenurable. */
/* Hbp points to a block containing small objects. */
/* Only a hint. May err on either side. */
bool GC_hblk_probably_tenurable(hbp)
register struct hblk *hbp; /* ptr to current heap block */
{
register int sz; /* size of objects in current block */
# define start_word_no HDR_WORDS
register int nobjs;
register int middle_obj;
register int beginning_obj;
sz = hb_sz(hbp);
if (sz < 0) sz = -sz;
switch (sz) {
case 1:
nobjs = (BYTES_TO_WORDS(HBLKSIZE) - start_word_no);
beginning_obj = start_word_no + ((nobjs - 1) >> 2);
break;
case 2:
nobjs = (BYTES_TO_WORDS(HBLKSIZE) - start_word_no)/2;
beginning_obj = start_word_no + (((nobjs - 1) >> 2) << 1);
break;
case 4:
nobjs = (BYTES_TO_WORDS(HBLKSIZE) - start_word_no)/4;
beginning_obj = start_word_no + ((nobjs - 1) & ~3);
break;
case 6:
nobjs = (BYTES_TO_WORDS(HBLKSIZE) - start_word_no)/6;
beginning_obj = start_word_no + ((nobjs - 1) >> 2) * 6;
break;
case 8:
nobjs = (BYTES_TO_WORDS(HBLKSIZE) - start_word_no)/8;
beginning_obj = start_word_no + (((nobjs - 1) >> 2) << 3);
break;
case 16:
nobjs = (BYTES_TO_WORDS(HBLKSIZE) - start_word_no)/16;
beginning_obj = start_word_no + (((nobjs - 1) >> 2) << 4);
break;
default:
nobjs = IDIV((BYTES_TO_WORDS(HBLKSIZE) - start_word_no),sz);
beginning_obj = start_word_no + ((nobjs - 1) >> 2) * sz;
}
if (!mark_bit(hbp, beginning_obj)) return(FALSE);
middle_obj = /* start_word_no + ((nobjs - 1) >> 1) * sz */
(beginning_obj << 1) - start_word_no;
if (!mark_bit(hbp, middle_obj)) return(FALSE);
return(TRUE);
}
/* Check whether a heap block is definitely tenurable. */
/* Hbp points to a block containing small objects. */
/* We do not dirty the heap block in question. */
bool GC_hblk_definitely_tenurable(hbp)
register struct hblk *hbp; /* ptr to current heap block */
{
register int word_no; /* Number of word in block */
register int mb; /* mark bit of current word */
register int sz; /* size of objects in current block */
# define start_word_no HDR_WORDS
int nobjs;
register int n_empty = 0; /* Number of reclaimable words. */
# ifdef SEPARATE_HEADERS
register struct hblkhdr * hhdr = HDR(hbp);
# endif
sz = hb_sz(hbp);
if (sz < 0) sz = -sz;
nobjs = IDIV((BYTES_TO_WORDS(HBLKSIZE) - start_word_no), sz);
word_no = start_word_no + (nobjs - 1) * sz;
/* Count empty words. */
while( word_no >= start_word_no ) {
# ifndef SEPARATE_HEADERS
mb = mark_bit(hbp, word_no);
# else
mb = mark_bit_from_hdr(hhdr, word_no);
# endif
if( ! mb ) {
n_empty += sz;
}
word_no -= sz;
}
return(TENURE(n_empty));
}
@